cache[i] = l1e_empty();
}
-void *map_domain_page(unsigned long pfn)
+void *map_domain_pages(unsigned long pfn, unsigned int order)
{
unsigned long va;
- unsigned int idx, cpu = smp_processor_id();
+ unsigned int idx, i, flags, cpu = smp_processor_id();
l1_pgentry_t *cache = mapcache;
#ifndef NDEBUG
unsigned int flush_count = 0;
local_flush_tlb();
shadow_epoch[cpu] = ++epoch;
}
+
+ flags = 0;
+ for ( i = 0; i < (1U << order); i++ )
+ flags |= l1e_get_flags(cache[idx+i]);
}
- while ( l1e_get_flags(cache[idx]) & _PAGE_PRESENT );
+ while ( flags & _PAGE_PRESENT );
- cache[idx] = l1e_from_pfn(pfn, __PAGE_HYPERVISOR);
+ for ( i = 0; i < (1U << order); i++ )
+ cache[idx+i] = l1e_from_pfn(pfn+i, __PAGE_HYPERVISOR);
spin_unlock(&map_lock);
return (void *)va;
}
-void unmap_domain_page(void *va)
+void unmap_domain_pages(void *va, unsigned int order)
{
- unsigned int idx;
+ unsigned int idx, i;
ASSERT((void *)MAPCACHE_VIRT_START <= va);
ASSERT(va < (void *)MAPCACHE_VIRT_END);
idx = ((unsigned long)va - MAPCACHE_VIRT_START) >> PAGE_SHIFT;
- l1e_add_flags(mapcache[idx], READY_FOR_TLB_FLUSH);
+ for ( i = 0; i < (1U << order); i++ )
+ l1e_add_flags(mapcache[idx+i], READY_FOR_TLB_FLUSH);
}
#include <xen/config.h>
#include <xen/mm.h>
+#define map_domain_page(pfn) map_domain_pages(pfn,0)
+#define unmap_domain_page(va) unmap_domain_pages(va,0)
+
#ifdef CONFIG_DOMAIN_PAGE
/*
- * Maps a given page frame, returning the mmap'ed virtual address. The page is
- * now accessible until a corresponding call to unmap_domain_page().
+ * Maps a given range of page frames, returning the mapped virtual address. The
+ * pages are now accessible until a corresponding call to unmap_domain_page().
*/
-extern void *map_domain_page(unsigned long pfn);
+extern void *map_domain_pages(unsigned long pfn, unsigned int order);
/*
- * Pass a VA within a page previously mapped with map_domain_page().
- * That page will then be removed from the mapping lists.
+ * Pass a VA within the first page of a range previously mapped with
+ * map_omain_pages(). Those pages will then be removed from the mapping lists.
*/
-extern void unmap_domain_page(void *va);
+extern void unmap_domain_pages(void *va, unsigned int order);
#define DMCACHE_ENTRY_VALID 1U
#define DMCACHE_ENTRY_HELD 2U
#else /* !CONFIG_DOMAIN_PAGE */
-#define map_domain_page(pfn) phys_to_virt((pfn)<<PAGE_SHIFT)
-#define unmap_domain_page(va) ((void)(va))
+#define map_domain_pages(pfn,order) phys_to_virt((pfn)<<PAGE_SHIFT)
+#define unmap_domain_pages(va,order) ((void)((void)(va),(void)(order)))
struct domain_mmap_cache {
};